From: Alex Williamson Date: Tue, 23 Oct 2007 16:21:31 +0000 (-0600) Subject: [IA64] Prevent softlock when destroying VTi domain X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~14846 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22?a=commitdiff_plain;h=a593b18698b3ff08e4f149648a7709a391fb5b93;p=xen.git [IA64] Prevent softlock when destroying VTi domain Prevent softlock up when VTi domain destruction by making relinquish_memory() continuable. It was assumed that mm_teardown() frees most of page_list so that the list which is passed to relinquish_memory() is short. However the assumption isn't true for VTi domain case because qemu-dm maps all the domain pages. To avoid softlock up message, make relinquish_memory() continuable. Signed-off-by: Isaku Yamahata --- diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c index f8e3bff5eb..5d41234cc8 100644 --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -584,7 +584,9 @@ int arch_domain_create(struct domain *d) goto fail_nomem; memset(&d->arch.mm, 0, sizeof(d->arch.mm)); + d->arch.relres = RELRES_not_started; d->arch.mm_teardown_offset = 0; + INIT_LIST_HEAD(&d->arch.relmem_list); if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL) goto fail_nomem; @@ -1495,13 +1497,14 @@ int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c) return rc; } -static void relinquish_memory(struct domain *d, struct list_head *list) +static int relinquish_memory(struct domain *d, struct list_head *list) { struct list_head *ent; struct page_info *page; #ifndef __ia64__ unsigned long x, y; #endif + int ret = 0; /* Use a recursive lock, as we may enter 'free_domheap_page'. */ spin_lock_recursive(&d->page_alloc_lock); @@ -1514,6 +1517,7 @@ static void relinquish_memory(struct domain *d, struct list_head *list) { /* Couldn't get a reference -- someone is freeing this page. */ ent = ent->next; + list_move_tail(&page->list, &d->arch.relmem_list); continue; } @@ -1550,35 +1554,72 @@ static void relinquish_memory(struct domain *d, struct list_head *list) /* Follow the list chain and /then/ potentially free the page. */ ent = ent->next; BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY); + list_move_tail(&page->list, &d->arch.relmem_list); put_page(page); + + if (hypercall_preempt_check()) { + ret = -EAGAIN; + goto out; + } } + list_splice_init(&d->arch.relmem_list, list); + + out: spin_unlock_recursive(&d->page_alloc_lock); + return ret; } int domain_relinquish_resources(struct domain *d) { - int ret; - /* Relinquish guest resources for VT-i domain. */ - if (d->arch.is_vti) - vmx_relinquish_guest_resources(d); + int ret = 0; - /* Tear down shadow mode stuff. */ - ret = mm_teardown(d); - if (ret != 0) - return ret; + switch (d->arch.relres) { + case RELRES_not_started: + /* Relinquish guest resources for VT-i domain. */ + if (d->arch.is_vti) + vmx_relinquish_guest_resources(d); + d->arch.relres = RELRES_mm_teardown; + /*fallthrough*/ + + case RELRES_mm_teardown: + /* Tear down shadow mode stuff. */ + ret = mm_teardown(d); + if (ret != 0) + return ret; + d->arch.relres = RELRES_xen; + /* fallthrough */ + + case RELRES_xen: + /* Relinquish every xen page of memory. */ + ret = relinquish_memory(d, &d->xenpage_list); + if (ret != 0) + return ret; + d->arch.relres = RELRES_dom; + /* fallthrough */ + + case RELRES_dom: + /* Relinquish every domain page of memory. */ + ret = relinquish_memory(d, &d->page_list); + if (ret != 0) + return ret; + d->arch.relres = RELRES_done; + /* fallthrough */ + + case RELRES_done: + break; - /* Relinquish every page of memory. */ - relinquish_memory(d, &d->xenpage_list); - relinquish_memory(d, &d->page_list); + default: + BUG(); + } - if (d->arch.is_vti && d->arch.sal_data) - xfree(d->arch.sal_data); + if (d->arch.is_vti && d->arch.sal_data) + xfree(d->arch.sal_data); - /* Free page used by xen oprofile buffer */ - free_xenoprof_pages(d); + /* Free page used by xen oprofile buffer */ + free_xenoprof_pages(d); - return 0; + return 0; } unsigned long diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h index 68e5256f04..e259402dda 100644 --- a/xen/include/asm-ia64/domain.h +++ b/xen/include/asm-ia64/domain.h @@ -192,7 +192,17 @@ struct arch_domain { #endif /* for domctl_destroy_domain continuation */ + enum { + RELRES_not_started, + RELRES_mm_teardown, + RELRES_xen, + RELRES_dom, + RELRES_done, + } relres; + /* Continuable mm_teardown() */ unsigned long mm_teardown_offset; + /* Continuable domain_relinquish_resources() */ + struct list_head relmem_list; }; #define INT_ENABLE_OFFSET(v) \ (sizeof(vcpu_info_t) * (v)->vcpu_id + \